In [1]:
import os
import numpy as np
import pandas as pd
import cv2
import matplotlib.pyplot as plt
#import scienceplots
import tensorflow as tf
from sklearn.model_selection import train_test_split
import random
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping, TensorBoard
import tensorflow.keras.callbacks
from keras import backend as K
from keras.regularizers import l2
import albumentations as A
In [2]:
train_transforms = A.Compose([
A.VerticalFlip(),
A.HorizontalFlip(),
A.Rotate(limit=30),
A.GaussNoise(var_limit=0.01)
])
val_transforms = A.Compose([
A.NoOp()
])
In [3]:
image = cv2.imread(r"C:\Users\stepan\ML\datasets\multiclass_semantic_liver-tumor\Train_Ct-Scan\volume-0_slice_56.jpg", cv2.IMREAD_GRAYSCALE)
hist = cv2.calcHist([image], [0], None, [256], [0, 256])
plt.hist(image.flatten(), bins=50, color='blue')
plt.xlabel('Pixel Intensity')
plt.ylabel('Frequency')
plt.title('Pixel Intensity Distribution')
plt.show()
In [4]:
mask = cv2.imread(r"C:\Users\stepan\ML\datasets\multiclass_semantic_liver-tumor\Train_Ct-Scan_masks\volume-0_slice_56.tiff", cv2.IMREAD_GRAYSCALE)
plt.hist(mask.flatten(), bins=30, color='blue')
plt.xlabel('Pixel Values')
plt.ylabel('Frequency')
plt.title('Pixel Values Distribution')
plt.show()
In [5]:
def visualize(**images):
n = len(images)
plt.figure(figsize=(9, 9))
for i, (name, image) in enumerate(images.items()):
plt.subplot(1, n, i + 1)
plt.title(' '.join(name.split('_')).title())
if 'mask' in name:
if image.ndim == 3 and image.shape[-1] > 1:
image = np.argmax(image, axis=-1)
plt.imshow(image, cmap = 'bone')
plt.show()
In [6]:
def read_image(x):
x = cv2.imread(x, cv2.IMREAD_GRAYSCALE)
x = x / 255.0
x = x.astype(np.float32)
x = np.expand_dims(x, axis=-1)
x = np.repeat(x, 3, axis=-1)
return x
def read_mask(x):
x = cv2.imread(x, cv2.IMREAD_GRAYSCALE)
x = x.astype(np.int32)
return x
In [7]:
def load_data(x_path, y_path):
images = os.listdir(x_path)
masks = os.listdir(y_path)
X = [os.path.join(x_path, image) for image in images]
y = [os.path.join(y_path, mask) for mask in masks]
train_x, val_x, train_y, val_y = train_test_split(X, y, test_size=0.2, random_state=42)
train_x, test_x, train_y, test_y = train_test_split(train_x, train_y, test_size=0.25, random_state=42)
return train_x, train_y, val_x, val_y, test_x, test_y
In [8]:
def tf_dataset(x, y, transformations, batch=8):
dataset = tf.data.Dataset.from_tensor_slices((x,y))
dataset = dataset.shuffle(buffer_size=len(x))
dataset = dataset.map(lambda x, y: preprocess(x, y, transformations))
dataset = dataset.batch(batch)
dataset = dataset.repeat()
dataset = dataset.prefetch(2)
return dataset
def preprocess(x, y, transformations):
def aug(x,y):
x = x.decode()
y = y.decode()
image = read_image(x)
mask = read_mask(y)
augmented = transformations(image=image,mask=mask)
aug_img=augmented['image']
aug_mask = augmented['mask']
return aug_img, aug_mask
image, mask = tf.numpy_function(aug, [x,y], [tf.float32, tf.int32])
mask = tf.one_hot(mask, 3, dtype = tf.int32) #обязательно так как сегментация мультиклассовая
image.set_shape([512,512,3])
mask.set_shape([512,512,3])
return image, mask
In [9]:
# def dice_coef(y_true, y_pred, smooth=1):
# y_true_f = K.flatten(y_true)
# y_pred_f = K.flatten(y_pred)
# intersection = K.sum(y_true_f * y_pred_f)
# return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
# def dice_coef_3Class(y_true, y_pred, numLabels=3):
# dice=0
# for index in range(numLabels):
# dice += dice_coef(y_true[:,:,:,index], y_pred[:,:,:,index])
# return dice/numLabels
In [10]:
Ncl = 3
w = K.zeros(shape=(Ncl,))
def generalized_dice_coeff(y_true, y_pred):
y_pred = K.cast(y_pred, 'float32')
y_true = K.cast(y_true, 'float32')
w = K.sum(y_true, axis=(0,1,2))
w = 1.0/(w**2+0.000001)
numerator = y_true*y_pred
numerator = w*K.sum(numerator,(0,1,2))
numerator = K.sum(numerator)
denominator = y_true+y_pred
denominator = w*K.sum(denominator,(0,1,2))
denominator = K.sum(denominator)
gen_dice_coef = 2*numerator/denominator
return gen_dice_coef
def generalized_dice_loss(y_true, y_pred):
return 1 - generalized_dice_coeff(y_true, y_pred)
In [11]:
class LossHistory(tensorflow.keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_epoch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
Гиперпараметры для моделей¶
In [12]:
IMG_HEIGHT = 512
IMG_WIDTH = 512
IMG_CHANNELS = 3
n_classes = 3
LR = 1e-4
epochs = 50
Формирование датасета и Визуализация¶
In [13]:
np.random.seed(42)
tf.random.set_seed(42)
batch_size = 4
images_path = r"C:\Users\stepan\ML\datasets\multiclass_semantic_liver-tumor\Train_Ct-Scan"
masks_path = r"C:\Users\stepan\ML\datasets\multiclass_semantic_liver-tumor\Train_Ct-Scan_masks"
train_x, train_y, val_x, val_y, test_x, test_y = load_data(images_path, masks_path)
train_ds = tf_dataset(train_x, train_y, transformations=train_transforms, batch=batch_size)
val_ds = tf_dataset(val_x, val_y, transformations=val_transforms, batch=batch_size)
In [14]:
for images, labels in train_ds.take(1):
numpy_images = images.numpy()
numpy_masks = labels.numpy()
print(numpy_masks.shape)
for i in range(numpy_images.shape[0]):
print(i)
visualize(image = numpy_images[i, ...], mask = numpy_masks[i, ...])
(4, 512, 512, 3) 0
1
2
3
In [15]:
train_steps = len(train_x)//batch_size
valid_steps = len(val_x)//batch_size
модель классический Unet¶
In [16]:
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, Concatenate, concatenate, Conv2DTranspose, Activation, BatchNormalization, Dropout, Lambda
def UNet(n_classes=3, IMG_HEIGHT=512, IMG_WIDTH=512, IMG_CHANNELS=3):
inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
#s = Lambda(lambda x: x / 255)(inputs)
s = inputs
#contraction path
c1 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(s)
c1 = Dropout(0.1)(c1)
c1 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c1)
p1 = MaxPooling2D((2, 2))(c1)
c2 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p1)
c2 = Dropout(0.1)(c2)
c2 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c2)
p2 = MaxPooling2D((2, 2))(c2)
c3 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p2)
c3 = Dropout(0.2)(c3)
c3 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c3)
p3 = MaxPooling2D((2, 2))(c3)
c4 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p3)
c4 = Dropout(0.2)(c4)
c4 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c4)
p4 = MaxPooling2D(pool_size=(2, 2))(c4)
#bottleneck
c5 = Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p4)
c5 = Dropout(0.3)(c5)
c5 = Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c5)
#expansion path
u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c5)
u6 = concatenate([u6, c4])
c6 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u6)
c6 = Dropout(0.2)(c6)
c6 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c6)
u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c6)
u7 = concatenate([u7, c3])
c7 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u7)
c7 = Dropout(0.2)(c7)
c7 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c7)
u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c7)
u8 = concatenate([u8, c2])
c8 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u8)
c8 = Dropout(0.1)(c8)
c8 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c8)
u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c8)
u9 = concatenate([u9, c1], axis=3)
c9 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u9)
c9 = Dropout(0.1)(c9)
c9 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c9)
outputs = Conv2D(n_classes, (1, 1), activation='softmax')(c9)
model = Model(inputs=[inputs], outputs=[outputs], name='UNET')
return model
Модель resunet plus plus¶
In [16]:
import tensorflow.keras.layers as L
from tensorflow.keras.models import Model
from keras.layers import Multiply, Add, GlobalAveragePooling2D, Reshape, Dense
def SE(inputs, ratio=8):
channel_axis = -1
num_filters = inputs.shape[channel_axis]
se_shape = (1, 1, num_filters)
x = GlobalAveragePooling2D()(inputs)
x = Reshape(se_shape)(x)
x = Dense(num_filters // ratio, activation='relu', use_bias=False)(x)
x = Dense(num_filters, activation='sigmoid', use_bias=False)(x)
x = Multiply()([inputs, x])
return x
def stem_block(inputs, num_filters):
## Conv 1
x = Conv2D(num_filters, 3, padding="same")(inputs)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(num_filters, 3, padding="same")(x)
## Shortcut
s = Conv2D(num_filters, 1, padding="same")(inputs)
## Add
x = Add()([x, s])
return x
def resnet_block(inputs, num_filters, strides=1):
## SE
inputs = SE(inputs)
## Conv 1
x = BatchNormalization()(inputs)
x = Activation("relu")(x)
x = Conv2D(num_filters, 3, padding="same", strides=strides)(x)
## Conv 2
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(num_filters, 3, padding="same", strides=1)(x)
## Shortcut
s = Conv2D(num_filters, 1, padding="same", strides=strides)(inputs)
## Add
x = Add()([x, s])
return x
def aspp_block(inputs, num_filters):
x1 = Conv2D(num_filters, 3, dilation_rate=6, padding="same")(inputs)
x1 = BatchNormalization()(x1)
x2 = Conv2D(num_filters, 3, dilation_rate=12, padding="same")(inputs)
x2 = BatchNormalization()(x2)
x3 = Conv2D(num_filters, 3, dilation_rate=18, padding="same")(inputs)
x3 = BatchNormalization()(x3)
x4 = Conv2D(num_filters, (3, 3), padding="same")(inputs)
x4 = BatchNormalization()(x4)
y = Add()([x1, x2, x3, x4])
y = Conv2D(num_filters, 1, padding="same")(y)
return y
def attetion_block(x1, x2):
num_filters = x2.shape[-1]
x1_conv = BatchNormalization()(x1)
x1_conv = Activation("relu")(x1_conv)
x1_conv = Conv2D(num_filters, 3, padding="same")(x1_conv)
x1_pool = MaxPooling2D((2, 2))(x1_conv)
x2_conv = BatchNormalization()(x2)
x2_conv = Activation("relu")(x2_conv)
x2_conv = Conv2D(num_filters, 3, padding="same")(x2_conv)
x = Add()([x1_pool, x2_conv])
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(num_filters, 3, padding="same")(x)
x = Multiply()([x, x2])
return x
def resunet_pp(n_classes, IMG_HEIGHT=IMG_HEIGHT, IMG_WIDTH=IMG_WIDTH, IMG_CHANNELS=IMG_CHANNELS):
inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
# encoding
c1 = stem_block(inputs, 16)
c2 = resnet_block(c1, 32, strides=2)
c3 = resnet_block(c2, 64, strides=2)
c4 = resnet_block(c3, 128, strides=2)
# bridge
b1 = aspp_block(c4, 256)
# decoding
d1 = attetion_block(c3, b1)
d1 = UpSampling2D((2, 2))(d1)
d1 = Concatenate()([d1, c3])
d1 = resnet_block(d1, 128)
d2 = attetion_block(c2, d1)
d2 = UpSampling2D((2, 2))(d2)
d2 = Concatenate()([d2, c2])
d2 = resnet_block(d2, 64)
d3 = attetion_block(c1, d2)
d3 = UpSampling2D((2, 2))(d3)
d3 = Concatenate()([d3, c1])
d3 = resnet_block(d3, 32)
outputs = aspp_block(d3, 16)
outputs = Conv2D(n_classes, 1, padding="same")(outputs)
outputs = Activation("softmax")(outputs)
model = Model(inputs, outputs, name='RESUNET_pp')
return model
VGG16 unet
In [18]:
from tensorflow.keras.applications import VGG16
def conv_block(input, num_filters):
x = Conv2D(num_filters, 3, padding="same")(input)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(num_filters, 3, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
return x
def decoder_block(input, skip_features, num_filters):
x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(input)
x = Concatenate()([x, skip_features])
x = conv_block(x, num_filters)
return x
def vgg16_unet(n_classes, IMG_HEIGHT=IMG_HEIGHT, IMG_WIDTH=IMG_WIDTH, IMG_CHANNELS=IMG_CHANNELS):
inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
vgg16 = VGG16(include_top=False, weights="imagenet", input_tensor=inputs)
s1 = vgg16.get_layer("block1_conv2").output
s2 = vgg16.get_layer("block2_conv2").output
s3 = vgg16.get_layer("block3_conv3").output
s4 = vgg16.get_layer("block4_conv3").output
b1 = vgg16.get_layer("block5_conv3").output
d1 = decoder_block(b1, s4, 512)
d2 = decoder_block(d1, s3, 256)
d3 = decoder_block(d2, s2, 128)
d4 = decoder_block(d3, s1, 64)
outputs = Conv2D(n_classes, 1, padding="same", activation="softmax")(d4)
model = Model(inputs, outputs, name="VGG16_UNet")
return model
In [19]:
unet_model = UNet(n_classes=n_classes, IMG_HEIGHT=IMG_HEIGHT, IMG_WIDTH=IMG_WIDTH, IMG_CHANNELS=IMG_CHANNELS)
unet_model.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(LR), metrics = [tf.keras.metrics.OneHotMeanIoU(num_classes=3)])
unet_model.summary()
Model: "UNET"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 512, 512, 3 0 []
)]
conv2d (Conv2D) (None, 512, 512, 16 448 ['input_1[0][0]']
)
dropout (Dropout) (None, 512, 512, 16 0 ['conv2d[0][0]']
)
conv2d_1 (Conv2D) (None, 512, 512, 16 2320 ['dropout[0][0]']
)
max_pooling2d (MaxPooling2D) (None, 256, 256, 16 0 ['conv2d_1[0][0]']
)
conv2d_2 (Conv2D) (None, 256, 256, 32 4640 ['max_pooling2d[0][0]']
)
dropout_1 (Dropout) (None, 256, 256, 32 0 ['conv2d_2[0][0]']
)
conv2d_3 (Conv2D) (None, 256, 256, 32 9248 ['dropout_1[0][0]']
)
max_pooling2d_1 (MaxPooling2D) (None, 128, 128, 32 0 ['conv2d_3[0][0]']
)
conv2d_4 (Conv2D) (None, 128, 128, 64 18496 ['max_pooling2d_1[0][0]']
)
dropout_2 (Dropout) (None, 128, 128, 64 0 ['conv2d_4[0][0]']
)
conv2d_5 (Conv2D) (None, 128, 128, 64 36928 ['dropout_2[0][0]']
)
max_pooling2d_2 (MaxPooling2D) (None, 64, 64, 64) 0 ['conv2d_5[0][0]']
conv2d_6 (Conv2D) (None, 64, 64, 128) 73856 ['max_pooling2d_2[0][0]']
dropout_3 (Dropout) (None, 64, 64, 128) 0 ['conv2d_6[0][0]']
conv2d_7 (Conv2D) (None, 64, 64, 128) 147584 ['dropout_3[0][0]']
max_pooling2d_3 (MaxPooling2D) (None, 32, 32, 128) 0 ['conv2d_7[0][0]']
conv2d_8 (Conv2D) (None, 32, 32, 256) 295168 ['max_pooling2d_3[0][0]']
dropout_4 (Dropout) (None, 32, 32, 256) 0 ['conv2d_8[0][0]']
conv2d_9 (Conv2D) (None, 32, 32, 256) 590080 ['dropout_4[0][0]']
conv2d_transpose (Conv2DTransp (None, 64, 64, 128) 131200 ['conv2d_9[0][0]']
ose)
concatenate (Concatenate) (None, 64, 64, 256) 0 ['conv2d_transpose[0][0]',
'conv2d_7[0][0]']
conv2d_10 (Conv2D) (None, 64, 64, 128) 295040 ['concatenate[0][0]']
dropout_5 (Dropout) (None, 64, 64, 128) 0 ['conv2d_10[0][0]']
conv2d_11 (Conv2D) (None, 64, 64, 128) 147584 ['dropout_5[0][0]']
conv2d_transpose_1 (Conv2DTran (None, 128, 128, 64 32832 ['conv2d_11[0][0]']
spose) )
concatenate_1 (Concatenate) (None, 128, 128, 12 0 ['conv2d_transpose_1[0][0]',
8) 'conv2d_5[0][0]']
conv2d_12 (Conv2D) (None, 128, 128, 64 73792 ['concatenate_1[0][0]']
)
dropout_6 (Dropout) (None, 128, 128, 64 0 ['conv2d_12[0][0]']
)
conv2d_13 (Conv2D) (None, 128, 128, 64 36928 ['dropout_6[0][0]']
)
conv2d_transpose_2 (Conv2DTran (None, 256, 256, 32 8224 ['conv2d_13[0][0]']
spose) )
concatenate_2 (Concatenate) (None, 256, 256, 64 0 ['conv2d_transpose_2[0][0]',
) 'conv2d_3[0][0]']
conv2d_14 (Conv2D) (None, 256, 256, 32 18464 ['concatenate_2[0][0]']
)
dropout_7 (Dropout) (None, 256, 256, 32 0 ['conv2d_14[0][0]']
)
conv2d_15 (Conv2D) (None, 256, 256, 32 9248 ['dropout_7[0][0]']
)
conv2d_transpose_3 (Conv2DTran (None, 512, 512, 16 2064 ['conv2d_15[0][0]']
spose) )
concatenate_3 (Concatenate) (None, 512, 512, 32 0 ['conv2d_transpose_3[0][0]',
) 'conv2d_1[0][0]']
conv2d_16 (Conv2D) (None, 512, 512, 16 4624 ['concatenate_3[0][0]']
)
dropout_8 (Dropout) (None, 512, 512, 16 0 ['conv2d_16[0][0]']
)
conv2d_17 (Conv2D) (None, 512, 512, 16 2320 ['dropout_8[0][0]']
)
conv2d_18 (Conv2D) (None, 512, 512, 3) 51 ['conv2d_17[0][0]']
==================================================================================================
Total params: 1,941,139
Trainable params: 1,941,139
Non-trainable params: 0
__________________________________________________________________________________________________
In [17]:
resunet_pp_model = resunet_pp(n_classes=n_classes, IMG_HEIGHT=IMG_HEIGHT, IMG_WIDTH=IMG_WIDTH, IMG_CHANNELS=IMG_CHANNELS)
resunet_pp_model.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(LR), metrics = [tf.keras.metrics.OneHotMeanIoU(num_classes=3)])
resunet_pp_model.summary()
Model: "RESUNET_pp"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 512, 512, 3 0 []
)]
conv2d (Conv2D) (None, 512, 512, 16 448 ['input_1[0][0]']
)
batch_normalization (BatchNorm (None, 512, 512, 16 64 ['conv2d[0][0]']
alization) )
activation (Activation) (None, 512, 512, 16 0 ['batch_normalization[0][0]']
)
conv2d_1 (Conv2D) (None, 512, 512, 16 2320 ['activation[0][0]']
)
conv2d_2 (Conv2D) (None, 512, 512, 16 64 ['input_1[0][0]']
)
add (Add) (None, 512, 512, 16 0 ['conv2d_1[0][0]',
) 'conv2d_2[0][0]']
global_average_pooling2d (Glob (None, 16) 0 ['add[0][0]']
alAveragePooling2D)
reshape (Reshape) (None, 1, 1, 16) 0 ['global_average_pooling2d[0][0]'
]
dense (Dense) (None, 1, 1, 2) 32 ['reshape[0][0]']
dense_1 (Dense) (None, 1, 1, 16) 32 ['dense[0][0]']
multiply (Multiply) (None, 512, 512, 16 0 ['add[0][0]',
) 'dense_1[0][0]']
batch_normalization_1 (BatchNo (None, 512, 512, 16 64 ['multiply[0][0]']
rmalization) )
activation_1 (Activation) (None, 512, 512, 16 0 ['batch_normalization_1[0][0]']
)
conv2d_3 (Conv2D) (None, 256, 256, 32 4640 ['activation_1[0][0]']
)
batch_normalization_2 (BatchNo (None, 256, 256, 32 128 ['conv2d_3[0][0]']
rmalization) )
activation_2 (Activation) (None, 256, 256, 32 0 ['batch_normalization_2[0][0]']
)
conv2d_4 (Conv2D) (None, 256, 256, 32 9248 ['activation_2[0][0]']
)
conv2d_5 (Conv2D) (None, 256, 256, 32 544 ['multiply[0][0]']
)
add_1 (Add) (None, 256, 256, 32 0 ['conv2d_4[0][0]',
) 'conv2d_5[0][0]']
global_average_pooling2d_1 (Gl (None, 32) 0 ['add_1[0][0]']
obalAveragePooling2D)
reshape_1 (Reshape) (None, 1, 1, 32) 0 ['global_average_pooling2d_1[0][0
]']
dense_2 (Dense) (None, 1, 1, 4) 128 ['reshape_1[0][0]']
dense_3 (Dense) (None, 1, 1, 32) 128 ['dense_2[0][0]']
multiply_1 (Multiply) (None, 256, 256, 32 0 ['add_1[0][0]',
) 'dense_3[0][0]']
batch_normalization_3 (BatchNo (None, 256, 256, 32 128 ['multiply_1[0][0]']
rmalization) )
activation_3 (Activation) (None, 256, 256, 32 0 ['batch_normalization_3[0][0]']
)
conv2d_6 (Conv2D) (None, 128, 128, 64 18496 ['activation_3[0][0]']
)
batch_normalization_4 (BatchNo (None, 128, 128, 64 256 ['conv2d_6[0][0]']
rmalization) )
activation_4 (Activation) (None, 128, 128, 64 0 ['batch_normalization_4[0][0]']
)
conv2d_7 (Conv2D) (None, 128, 128, 64 36928 ['activation_4[0][0]']
)
conv2d_8 (Conv2D) (None, 128, 128, 64 2112 ['multiply_1[0][0]']
)
add_2 (Add) (None, 128, 128, 64 0 ['conv2d_7[0][0]',
) 'conv2d_8[0][0]']
global_average_pooling2d_2 (Gl (None, 64) 0 ['add_2[0][0]']
obalAveragePooling2D)
reshape_2 (Reshape) (None, 1, 1, 64) 0 ['global_average_pooling2d_2[0][0
]']
dense_4 (Dense) (None, 1, 1, 8) 512 ['reshape_2[0][0]']
dense_5 (Dense) (None, 1, 1, 64) 512 ['dense_4[0][0]']
multiply_2 (Multiply) (None, 128, 128, 64 0 ['add_2[0][0]',
) 'dense_5[0][0]']
batch_normalization_5 (BatchNo (None, 128, 128, 64 256 ['multiply_2[0][0]']
rmalization) )
activation_5 (Activation) (None, 128, 128, 64 0 ['batch_normalization_5[0][0]']
)
conv2d_9 (Conv2D) (None, 64, 64, 128) 73856 ['activation_5[0][0]']
batch_normalization_6 (BatchNo (None, 64, 64, 128) 512 ['conv2d_9[0][0]']
rmalization)
activation_6 (Activation) (None, 64, 64, 128) 0 ['batch_normalization_6[0][0]']
conv2d_10 (Conv2D) (None, 64, 64, 128) 147584 ['activation_6[0][0]']
conv2d_11 (Conv2D) (None, 64, 64, 128) 8320 ['multiply_2[0][0]']
add_3 (Add) (None, 64, 64, 128) 0 ['conv2d_10[0][0]',
'conv2d_11[0][0]']
conv2d_12 (Conv2D) (None, 64, 64, 256) 295168 ['add_3[0][0]']
conv2d_13 (Conv2D) (None, 64, 64, 256) 295168 ['add_3[0][0]']
conv2d_14 (Conv2D) (None, 64, 64, 256) 295168 ['add_3[0][0]']
conv2d_15 (Conv2D) (None, 64, 64, 256) 295168 ['add_3[0][0]']
batch_normalization_7 (BatchNo (None, 64, 64, 256) 1024 ['conv2d_12[0][0]']
rmalization)
batch_normalization_8 (BatchNo (None, 64, 64, 256) 1024 ['conv2d_13[0][0]']
rmalization)
batch_normalization_9 (BatchNo (None, 64, 64, 256) 1024 ['conv2d_14[0][0]']
rmalization)
batch_normalization_10 (BatchN (None, 64, 64, 256) 1024 ['conv2d_15[0][0]']
ormalization)
add_4 (Add) (None, 64, 64, 256) 0 ['batch_normalization_7[0][0]',
'batch_normalization_8[0][0]',
'batch_normalization_9[0][0]',
'batch_normalization_10[0][0]']
batch_normalization_11 (BatchN (None, 128, 128, 64 256 ['add_2[0][0]']
ormalization) )
conv2d_16 (Conv2D) (None, 64, 64, 256) 65792 ['add_4[0][0]']
activation_7 (Activation) (None, 128, 128, 64 0 ['batch_normalization_11[0][0]']
)
batch_normalization_12 (BatchN (None, 64, 64, 256) 1024 ['conv2d_16[0][0]']
ormalization)
conv2d_17 (Conv2D) (None, 128, 128, 25 147712 ['activation_7[0][0]']
6)
activation_8 (Activation) (None, 64, 64, 256) 0 ['batch_normalization_12[0][0]']
max_pooling2d (MaxPooling2D) (None, 64, 64, 256) 0 ['conv2d_17[0][0]']
conv2d_18 (Conv2D) (None, 64, 64, 256) 590080 ['activation_8[0][0]']
add_5 (Add) (None, 64, 64, 256) 0 ['max_pooling2d[0][0]',
'conv2d_18[0][0]']
batch_normalization_13 (BatchN (None, 64, 64, 256) 1024 ['add_5[0][0]']
ormalization)
activation_9 (Activation) (None, 64, 64, 256) 0 ['batch_normalization_13[0][0]']
conv2d_19 (Conv2D) (None, 64, 64, 256) 590080 ['activation_9[0][0]']
multiply_3 (Multiply) (None, 64, 64, 256) 0 ['conv2d_19[0][0]',
'conv2d_16[0][0]']
up_sampling2d (UpSampling2D) (None, 128, 128, 25 0 ['multiply_3[0][0]']
6)
concatenate (Concatenate) (None, 128, 128, 32 0 ['up_sampling2d[0][0]',
0) 'add_2[0][0]']
global_average_pooling2d_3 (Gl (None, 320) 0 ['concatenate[0][0]']
obalAveragePooling2D)
reshape_3 (Reshape) (None, 1, 1, 320) 0 ['global_average_pooling2d_3[0][0
]']
dense_6 (Dense) (None, 1, 1, 40) 12800 ['reshape_3[0][0]']
dense_7 (Dense) (None, 1, 1, 320) 12800 ['dense_6[0][0]']
multiply_4 (Multiply) (None, 128, 128, 32 0 ['concatenate[0][0]',
0) 'dense_7[0][0]']
batch_normalization_14 (BatchN (None, 128, 128, 32 1280 ['multiply_4[0][0]']
ormalization) 0)
activation_10 (Activation) (None, 128, 128, 32 0 ['batch_normalization_14[0][0]']
0)
conv2d_20 (Conv2D) (None, 128, 128, 12 368768 ['activation_10[0][0]']
8)
batch_normalization_15 (BatchN (None, 128, 128, 12 512 ['conv2d_20[0][0]']
ormalization) 8)
activation_11 (Activation) (None, 128, 128, 12 0 ['batch_normalization_15[0][0]']
8)
conv2d_21 (Conv2D) (None, 128, 128, 12 147584 ['activation_11[0][0]']
8)
conv2d_22 (Conv2D) (None, 128, 128, 12 41088 ['multiply_4[0][0]']
8)
batch_normalization_16 (BatchN (None, 256, 256, 32 128 ['add_1[0][0]']
ormalization) )
add_6 (Add) (None, 128, 128, 12 0 ['conv2d_21[0][0]',
8) 'conv2d_22[0][0]']
activation_12 (Activation) (None, 256, 256, 32 0 ['batch_normalization_16[0][0]']
)
batch_normalization_17 (BatchN (None, 128, 128, 12 512 ['add_6[0][0]']
ormalization) 8)
conv2d_23 (Conv2D) (None, 256, 256, 12 36992 ['activation_12[0][0]']
8)
activation_13 (Activation) (None, 128, 128, 12 0 ['batch_normalization_17[0][0]']
8)
max_pooling2d_1 (MaxPooling2D) (None, 128, 128, 12 0 ['conv2d_23[0][0]']
8)
conv2d_24 (Conv2D) (None, 128, 128, 12 147584 ['activation_13[0][0]']
8)
add_7 (Add) (None, 128, 128, 12 0 ['max_pooling2d_1[0][0]',
8) 'conv2d_24[0][0]']
batch_normalization_18 (BatchN (None, 128, 128, 12 512 ['add_7[0][0]']
ormalization) 8)
activation_14 (Activation) (None, 128, 128, 12 0 ['batch_normalization_18[0][0]']
8)
conv2d_25 (Conv2D) (None, 128, 128, 12 147584 ['activation_14[0][0]']
8)
multiply_5 (Multiply) (None, 128, 128, 12 0 ['conv2d_25[0][0]',
8) 'add_6[0][0]']
up_sampling2d_1 (UpSampling2D) (None, 256, 256, 12 0 ['multiply_5[0][0]']
8)
concatenate_1 (Concatenate) (None, 256, 256, 16 0 ['up_sampling2d_1[0][0]',
0) 'add_1[0][0]']
global_average_pooling2d_4 (Gl (None, 160) 0 ['concatenate_1[0][0]']
obalAveragePooling2D)
reshape_4 (Reshape) (None, 1, 1, 160) 0 ['global_average_pooling2d_4[0][0
]']
dense_8 (Dense) (None, 1, 1, 20) 3200 ['reshape_4[0][0]']
dense_9 (Dense) (None, 1, 1, 160) 3200 ['dense_8[0][0]']
multiply_6 (Multiply) (None, 256, 256, 16 0 ['concatenate_1[0][0]',
0) 'dense_9[0][0]']
batch_normalization_19 (BatchN (None, 256, 256, 16 640 ['multiply_6[0][0]']
ormalization) 0)
activation_15 (Activation) (None, 256, 256, 16 0 ['batch_normalization_19[0][0]']
0)
conv2d_26 (Conv2D) (None, 256, 256, 64 92224 ['activation_15[0][0]']
)
batch_normalization_20 (BatchN (None, 256, 256, 64 256 ['conv2d_26[0][0]']
ormalization) )
activation_16 (Activation) (None, 256, 256, 64 0 ['batch_normalization_20[0][0]']
)
conv2d_27 (Conv2D) (None, 256, 256, 64 36928 ['activation_16[0][0]']
)
conv2d_28 (Conv2D) (None, 256, 256, 64 10304 ['multiply_6[0][0]']
)
batch_normalization_21 (BatchN (None, 512, 512, 16 64 ['add[0][0]']
ormalization) )
add_8 (Add) (None, 256, 256, 64 0 ['conv2d_27[0][0]',
) 'conv2d_28[0][0]']
activation_17 (Activation) (None, 512, 512, 16 0 ['batch_normalization_21[0][0]']
)
batch_normalization_22 (BatchN (None, 256, 256, 64 256 ['add_8[0][0]']
ormalization) )
conv2d_29 (Conv2D) (None, 512, 512, 64 9280 ['activation_17[0][0]']
)
activation_18 (Activation) (None, 256, 256, 64 0 ['batch_normalization_22[0][0]']
)
max_pooling2d_2 (MaxPooling2D) (None, 256, 256, 64 0 ['conv2d_29[0][0]']
)
conv2d_30 (Conv2D) (None, 256, 256, 64 36928 ['activation_18[0][0]']
)
add_9 (Add) (None, 256, 256, 64 0 ['max_pooling2d_2[0][0]',
) 'conv2d_30[0][0]']
batch_normalization_23 (BatchN (None, 256, 256, 64 256 ['add_9[0][0]']
ormalization) )
activation_19 (Activation) (None, 256, 256, 64 0 ['batch_normalization_23[0][0]']
)
conv2d_31 (Conv2D) (None, 256, 256, 64 36928 ['activation_19[0][0]']
)
multiply_7 (Multiply) (None, 256, 256, 64 0 ['conv2d_31[0][0]',
) 'add_8[0][0]']
up_sampling2d_2 (UpSampling2D) (None, 512, 512, 64 0 ['multiply_7[0][0]']
)
concatenate_2 (Concatenate) (None, 512, 512, 80 0 ['up_sampling2d_2[0][0]',
) 'add[0][0]']
global_average_pooling2d_5 (Gl (None, 80) 0 ['concatenate_2[0][0]']
obalAveragePooling2D)
reshape_5 (Reshape) (None, 1, 1, 80) 0 ['global_average_pooling2d_5[0][0
]']
dense_10 (Dense) (None, 1, 1, 10) 800 ['reshape_5[0][0]']
dense_11 (Dense) (None, 1, 1, 80) 800 ['dense_10[0][0]']
multiply_8 (Multiply) (None, 512, 512, 80 0 ['concatenate_2[0][0]',
) 'dense_11[0][0]']
batch_normalization_24 (BatchN (None, 512, 512, 80 320 ['multiply_8[0][0]']
ormalization) )
activation_20 (Activation) (None, 512, 512, 80 0 ['batch_normalization_24[0][0]']
)
conv2d_32 (Conv2D) (None, 512, 512, 32 23072 ['activation_20[0][0]']
)
batch_normalization_25 (BatchN (None, 512, 512, 32 128 ['conv2d_32[0][0]']
ormalization) )
activation_21 (Activation) (None, 512, 512, 32 0 ['batch_normalization_25[0][0]']
)
conv2d_33 (Conv2D) (None, 512, 512, 32 9248 ['activation_21[0][0]']
)
conv2d_34 (Conv2D) (None, 512, 512, 32 2592 ['multiply_8[0][0]']
)
add_10 (Add) (None, 512, 512, 32 0 ['conv2d_33[0][0]',
) 'conv2d_34[0][0]']
conv2d_35 (Conv2D) (None, 512, 512, 16 4624 ['add_10[0][0]']
)
conv2d_36 (Conv2D) (None, 512, 512, 16 4624 ['add_10[0][0]']
)
conv2d_37 (Conv2D) (None, 512, 512, 16 4624 ['add_10[0][0]']
)
conv2d_38 (Conv2D) (None, 512, 512, 16 4624 ['add_10[0][0]']
)
batch_normalization_26 (BatchN (None, 512, 512, 16 64 ['conv2d_35[0][0]']
ormalization) )
batch_normalization_27 (BatchN (None, 512, 512, 16 64 ['conv2d_36[0][0]']
ormalization) )
batch_normalization_28 (BatchN (None, 512, 512, 16 64 ['conv2d_37[0][0]']
ormalization) )
batch_normalization_29 (BatchN (None, 512, 512, 16 64 ['conv2d_38[0][0]']
ormalization) )
add_11 (Add) (None, 512, 512, 16 0 ['batch_normalization_26[0][0]',
) 'batch_normalization_27[0][0]',
'batch_normalization_28[0][0]',
'batch_normalization_29[0][0]']
conv2d_39 (Conv2D) (None, 512, 512, 16 272 ['add_11[0][0]']
)
conv2d_40 (Conv2D) (None, 512, 512, 3) 51 ['conv2d_39[0][0]']
activation_22 (Activation) (None, 512, 512, 3) 0 ['conv2d_40[0][0]']
==================================================================================================
Total params: 4,092,691
Trainable params: 4,086,227
Non-trainable params: 6,464
__________________________________________________________________________________________________
In [20]:
vgg16unet_model = vgg16_unet(n_classes=n_classes, IMG_HEIGHT=IMG_HEIGHT, IMG_WIDTH=IMG_WIDTH, IMG_CHANNELS=IMG_CHANNELS)
vgg16unet_model.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(LR), metrics = [tf.keras.metrics.OneHotMeanIoU(num_classes=3)])
vgg16unet_model.summary()
Model: "VGG16_UNet"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 512, 512, 3 0 []
)]
block1_conv1 (Conv2D) (None, 512, 512, 64 1792 ['input_1[0][0]']
)
block1_conv2 (Conv2D) (None, 512, 512, 64 36928 ['block1_conv1[0][0]']
)
block1_pool (MaxPooling2D) (None, 256, 256, 64 0 ['block1_conv2[0][0]']
)
block2_conv1 (Conv2D) (None, 256, 256, 12 73856 ['block1_pool[0][0]']
8)
block2_conv2 (Conv2D) (None, 256, 256, 12 147584 ['block2_conv1[0][0]']
8)
block2_pool (MaxPooling2D) (None, 128, 128, 12 0 ['block2_conv2[0][0]']
8)
block3_conv1 (Conv2D) (None, 128, 128, 25 295168 ['block2_pool[0][0]']
6)
block3_conv2 (Conv2D) (None, 128, 128, 25 590080 ['block3_conv1[0][0]']
6)
block3_conv3 (Conv2D) (None, 128, 128, 25 590080 ['block3_conv2[0][0]']
6)
block3_pool (MaxPooling2D) (None, 64, 64, 256) 0 ['block3_conv3[0][0]']
block4_conv1 (Conv2D) (None, 64, 64, 512) 1180160 ['block3_pool[0][0]']
block4_conv2 (Conv2D) (None, 64, 64, 512) 2359808 ['block4_conv1[0][0]']
block4_conv3 (Conv2D) (None, 64, 64, 512) 2359808 ['block4_conv2[0][0]']
block4_pool (MaxPooling2D) (None, 32, 32, 512) 0 ['block4_conv3[0][0]']
block5_conv1 (Conv2D) (None, 32, 32, 512) 2359808 ['block4_pool[0][0]']
block5_conv2 (Conv2D) (None, 32, 32, 512) 2359808 ['block5_conv1[0][0]']
block5_conv3 (Conv2D) (None, 32, 32, 512) 2359808 ['block5_conv2[0][0]']
conv2d_transpose (Conv2DTransp (None, 64, 64, 512) 1049088 ['block5_conv3[0][0]']
ose)
concatenate (Concatenate) (None, 64, 64, 1024 0 ['conv2d_transpose[0][0]',
) 'block4_conv3[0][0]']
conv2d (Conv2D) (None, 64, 64, 512) 4719104 ['concatenate[0][0]']
batch_normalization (BatchNorm (None, 64, 64, 512) 2048 ['conv2d[0][0]']
alization)
activation (Activation) (None, 64, 64, 512) 0 ['batch_normalization[0][0]']
conv2d_1 (Conv2D) (None, 64, 64, 512) 2359808 ['activation[0][0]']
batch_normalization_1 (BatchNo (None, 64, 64, 512) 2048 ['conv2d_1[0][0]']
rmalization)
activation_1 (Activation) (None, 64, 64, 512) 0 ['batch_normalization_1[0][0]']
conv2d_transpose_1 (Conv2DTran (None, 128, 128, 25 524544 ['activation_1[0][0]']
spose) 6)
concatenate_1 (Concatenate) (None, 128, 128, 51 0 ['conv2d_transpose_1[0][0]',
2) 'block3_conv3[0][0]']
conv2d_2 (Conv2D) (None, 128, 128, 25 1179904 ['concatenate_1[0][0]']
6)
batch_normalization_2 (BatchNo (None, 128, 128, 25 1024 ['conv2d_2[0][0]']
rmalization) 6)
activation_2 (Activation) (None, 128, 128, 25 0 ['batch_normalization_2[0][0]']
6)
conv2d_3 (Conv2D) (None, 128, 128, 25 590080 ['activation_2[0][0]']
6)
batch_normalization_3 (BatchNo (None, 128, 128, 25 1024 ['conv2d_3[0][0]']
rmalization) 6)
activation_3 (Activation) (None, 128, 128, 25 0 ['batch_normalization_3[0][0]']
6)
conv2d_transpose_2 (Conv2DTran (None, 256, 256, 12 131200 ['activation_3[0][0]']
spose) 8)
concatenate_2 (Concatenate) (None, 256, 256, 25 0 ['conv2d_transpose_2[0][0]',
6) 'block2_conv2[0][0]']
conv2d_4 (Conv2D) (None, 256, 256, 12 295040 ['concatenate_2[0][0]']
8)
batch_normalization_4 (BatchNo (None, 256, 256, 12 512 ['conv2d_4[0][0]']
rmalization) 8)
activation_4 (Activation) (None, 256, 256, 12 0 ['batch_normalization_4[0][0]']
8)
conv2d_5 (Conv2D) (None, 256, 256, 12 147584 ['activation_4[0][0]']
8)
batch_normalization_5 (BatchNo (None, 256, 256, 12 512 ['conv2d_5[0][0]']
rmalization) 8)
activation_5 (Activation) (None, 256, 256, 12 0 ['batch_normalization_5[0][0]']
8)
conv2d_transpose_3 (Conv2DTran (None, 512, 512, 64 32832 ['activation_5[0][0]']
spose) )
concatenate_3 (Concatenate) (None, 512, 512, 12 0 ['conv2d_transpose_3[0][0]',
8) 'block1_conv2[0][0]']
conv2d_6 (Conv2D) (None, 512, 512, 64 73792 ['concatenate_3[0][0]']
)
batch_normalization_6 (BatchNo (None, 512, 512, 64 256 ['conv2d_6[0][0]']
rmalization) )
activation_6 (Activation) (None, 512, 512, 64 0 ['batch_normalization_6[0][0]']
)
conv2d_7 (Conv2D) (None, 512, 512, 64 36928 ['activation_6[0][0]']
)
batch_normalization_7 (BatchNo (None, 512, 512, 64 256 ['conv2d_7[0][0]']
rmalization) )
activation_7 (Activation) (None, 512, 512, 64 0 ['batch_normalization_7[0][0]']
)
conv2d_8 (Conv2D) (None, 512, 512, 3) 195 ['activation_7[0][0]']
==================================================================================================
Total params: 25,862,467
Trainable params: 25,858,627
Non-trainable params: 3,840
__________________________________________________________________________________________________
Обучение unet¶
In [21]:
history_class = LossHistory()
callbacks = [
ModelCheckpoint("UNet.hdf5", verbose=1, save_best_model=True),
ReduceLROnPlateau(monitor="val_loss", patience=3, factor=0.1, verbose=1, min_lr=1e-6),
EarlyStopping(monitor="val_loss", patience=3, verbose=1,min_delta=0.0001),
TensorBoard(log_dir='logs_unet'),
history_class
]
unet_model_history = unet_model.fit(train_ds,
steps_per_epoch=train_steps,validation_data=val_ds,
validation_steps=valid_steps,epochs=epochs,
callbacks=callbacks,verbose = 1)
unet_model.save('final_UNet.hdf5')
unet_hist_df = pd.DataFrame(unet_model_history.history)
hist_csv_file = 'history_unet.csv'
with open(hist_csv_file, mode='w') as f:
unet_hist_df.to_csv(f)
Epoch 1/50 1077/1077 [==============================] - ETA: 0s - loss: 0.9231 - one_hot_mean_io_u: 0.0412 Epoch 1: saving model to UNet.hdf5 1077/1077 [==============================] - 191s 168ms/step - loss: 0.9231 - one_hot_mean_io_u: 0.0412 - val_loss: 0.9181 - val_one_hot_mean_io_u: 0.0275 - lr: 1.0000e-04 Epoch 2/50 1077/1077 [==============================] - ETA: 0s - loss: 0.9192 - one_hot_mean_io_u: 0.0277 Epoch 2: saving model to UNet.hdf5 1077/1077 [==============================] - 175s 161ms/step - loss: 0.9192 - one_hot_mean_io_u: 0.0277 - val_loss: 0.9178 - val_one_hot_mean_io_u: 0.0282 - lr: 1.0000e-04 Epoch 3/50 1077/1077 [==============================] - ETA: 0s - loss: 0.8647 - one_hot_mean_io_u: 0.2507 Epoch 3: saving model to UNet.hdf5 1077/1077 [==============================] - 174s 161ms/step - loss: 0.8647 - one_hot_mean_io_u: 0.2507 - val_loss: 0.8243 - val_one_hot_mean_io_u: 0.4847 - lr: 1.0000e-04 Epoch 4/50 1077/1077 [==============================] - ETA: 0s - loss: 0.8263 - one_hot_mean_io_u: 0.5144 Epoch 4: saving model to UNet.hdf5 1077/1077 [==============================] - 172s 159ms/step - loss: 0.8263 - one_hot_mean_io_u: 0.5144 - val_loss: 0.8169 - val_one_hot_mean_io_u: 0.5043 - lr: 1.0000e-04 Epoch 5/50 1077/1077 [==============================] - ETA: 0s - loss: 0.8221 - one_hot_mean_io_u: 0.5246 Epoch 5: saving model to UNet.hdf5 1077/1077 [==============================] - 173s 161ms/step - loss: 0.8221 - one_hot_mean_io_u: 0.5246 - val_loss: 0.8201 - val_one_hot_mean_io_u: 0.4887 - lr: 1.0000e-04 Epoch 6/50 1077/1077 [==============================] - ETA: 0s - loss: 0.8222 - one_hot_mean_io_u: 0.5268 Epoch 6: saving model to UNet.hdf5 1077/1077 [==============================] - 171s 159ms/step - loss: 0.8222 - one_hot_mean_io_u: 0.5268 - val_loss: 0.8167 - val_one_hot_mean_io_u: 0.5262 - lr: 1.0000e-04 Epoch 7/50 1077/1077 [==============================] - ETA: 0s - loss: 0.8209 - one_hot_mean_io_u: 0.5348 Epoch 7: saving model to UNet.hdf5 1077/1077 [==============================] - 172s 160ms/step - loss: 0.8209 - one_hot_mean_io_u: 0.5348 - val_loss: 0.8151 - val_one_hot_mean_io_u: 0.5337 - lr: 1.0000e-04 Epoch 8/50 1077/1077 [==============================] - ETA: 0s - loss: 0.8204 - one_hot_mean_io_u: 0.5414 Epoch 8: saving model to UNet.hdf5 1077/1077 [==============================] - 170s 158ms/step - loss: 0.8204 - one_hot_mean_io_u: 0.5414 - val_loss: 0.8130 - val_one_hot_mean_io_u: 0.5325 - lr: 1.0000e-04 Epoch 9/50 1077/1077 [==============================] - ETA: 0s - loss: 0.4552 - one_hot_mean_io_u: 0.6875 Epoch 9: saving model to UNet.hdf5 1077/1077 [==============================] - 173s 160ms/step - loss: 0.4552 - one_hot_mean_io_u: 0.6875 - val_loss: 0.3743 - val_one_hot_mean_io_u: 0.7207 - lr: 1.0000e-04 Epoch 10/50 1077/1077 [==============================] - ETA: 0s - loss: 0.3833 - one_hot_mean_io_u: 0.7296 Epoch 10: saving model to UNet.hdf5 1077/1077 [==============================] - 171s 158ms/step - loss: 0.3833 - one_hot_mean_io_u: 0.7296 - val_loss: 0.3577 - val_one_hot_mean_io_u: 0.7376 - lr: 1.0000e-04 Epoch 11/50 1077/1077 [==============================] - ETA: 0s - loss: 0.3615 - one_hot_mean_io_u: 0.7375 Epoch 11: saving model to UNet.hdf5 1077/1077 [==============================] - 175s 162ms/step - loss: 0.3615 - one_hot_mean_io_u: 0.7375 - val_loss: 0.3280 - val_one_hot_mean_io_u: 0.7510 - lr: 1.0000e-04 Epoch 12/50 1077/1077 [==============================] - ETA: 0s - loss: 0.3421 - one_hot_mean_io_u: 0.7459 Epoch 12: saving model to UNet.hdf5 1077/1077 [==============================] - 172s 160ms/step - loss: 0.3421 - one_hot_mean_io_u: 0.7459 - val_loss: 0.3567 - val_one_hot_mean_io_u: 0.7316 - lr: 1.0000e-04 Epoch 13/50 1077/1077 [==============================] - ETA: 0s - loss: 0.3317 - one_hot_mean_io_u: 0.7515 Epoch 13: saving model to UNet.hdf5 1077/1077 [==============================] - 175s 162ms/step - loss: 0.3317 - one_hot_mean_io_u: 0.7515 - val_loss: 0.3244 - val_one_hot_mean_io_u: 0.7571 - lr: 1.0000e-04 Epoch 14/50 1077/1077 [==============================] - ETA: 0s - loss: 0.3121 - one_hot_mean_io_u: 0.7611 Epoch 14: saving model to UNet.hdf5 1077/1077 [==============================] - 174s 162ms/step - loss: 0.3121 - one_hot_mean_io_u: 0.7611 - val_loss: 0.2853 - val_one_hot_mean_io_u: 0.7721 - lr: 1.0000e-04 Epoch 15/50 1077/1077 [==============================] - ETA: 0s - loss: 0.3038 - one_hot_mean_io_u: 0.7682 Epoch 15: saving model to UNet.hdf5 1077/1077 [==============================] - 174s 162ms/step - loss: 0.3038 - one_hot_mean_io_u: 0.7682 - val_loss: 0.2870 - val_one_hot_mean_io_u: 0.7761 - lr: 1.0000e-04 Epoch 16/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2890 - one_hot_mean_io_u: 0.7728 Epoch 16: saving model to UNet.hdf5 1077/1077 [==============================] - 174s 162ms/step - loss: 0.2890 - one_hot_mean_io_u: 0.7728 - val_loss: 0.2630 - val_one_hot_mean_io_u: 0.7902 - lr: 1.0000e-04 Epoch 17/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2849 - one_hot_mean_io_u: 0.7768 Epoch 17: saving model to UNet.hdf5 1077/1077 [==============================] - 175s 162ms/step - loss: 0.2849 - one_hot_mean_io_u: 0.7768 - val_loss: 0.2927 - val_one_hot_mean_io_u: 0.7665 - lr: 1.0000e-04 Epoch 18/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2717 - one_hot_mean_io_u: 0.7850 Epoch 18: saving model to UNet.hdf5 1077/1077 [==============================] - 174s 162ms/step - loss: 0.2717 - one_hot_mean_io_u: 0.7850 - val_loss: 0.3220 - val_one_hot_mean_io_u: 0.7440 - lr: 1.0000e-04 Epoch 19/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2793 - one_hot_mean_io_u: 0.7798 Epoch 19: saving model to UNet.hdf5 1077/1077 [==============================] - 174s 162ms/step - loss: 0.2793 - one_hot_mean_io_u: 0.7798 - val_loss: 0.2424 - val_one_hot_mean_io_u: 0.8054 - lr: 1.0000e-04 Epoch 20/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2680 - one_hot_mean_io_u: 0.7893 Epoch 20: saving model to UNet.hdf5 1077/1077 [==============================] - 174s 162ms/step - loss: 0.2680 - one_hot_mean_io_u: 0.7893 - val_loss: 0.2590 - val_one_hot_mean_io_u: 0.7816 - lr: 1.0000e-04 Epoch 21/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2568 - one_hot_mean_io_u: 0.7949 Epoch 21: saving model to UNet.hdf5 1077/1077 [==============================] - 175s 162ms/step - loss: 0.2568 - one_hot_mean_io_u: 0.7949 - val_loss: 0.2331 - val_one_hot_mean_io_u: 0.8167 - lr: 1.0000e-04 Epoch 22/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2537 - one_hot_mean_io_u: 0.7975 Epoch 22: saving model to UNet.hdf5 1077/1077 [==============================] - 176s 163ms/step - loss: 0.2537 - one_hot_mean_io_u: 0.7975 - val_loss: 0.2590 - val_one_hot_mean_io_u: 0.7911 - lr: 1.0000e-04 Epoch 23/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2548 - one_hot_mean_io_u: 0.7962 Epoch 23: saving model to UNet.hdf5 1077/1077 [==============================] - 175s 163ms/step - loss: 0.2548 - one_hot_mean_io_u: 0.7962 - val_loss: 0.2394 - val_one_hot_mean_io_u: 0.8013 - lr: 1.0000e-04 Epoch 24/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2422 - one_hot_mean_io_u: 0.8034 Epoch 24: saving model to UNet.hdf5 Epoch 24: ReduceLROnPlateau reducing learning rate to 9.999999747378752e-06. 1077/1077 [==============================] - 176s 163ms/step - loss: 0.2422 - one_hot_mean_io_u: 0.8034 - val_loss: 0.2600 - val_one_hot_mean_io_u: 0.7963 - lr: 1.0000e-04 Epoch 24: early stopping
Обучение resunet_pp¶
In [18]:
history_class = LossHistory()
callbacks = [
ModelCheckpoint("resunet_pp.hdf5", verbose=1, save_best_model=True),
ReduceLROnPlateau(monitor="val_loss", patience=3, factor=0.1, verbose=1, min_lr=1e-6),
EarlyStopping(monitor="val_loss", patience=3, verbose=1,min_delta=0.0001),
TensorBoard(log_dir='logs_resunet_pp'),
history_class
]
resunet_pp_model_history = resunet_pp_model.fit(train_ds,
steps_per_epoch=train_steps,validation_data=val_ds,
validation_steps=valid_steps,epochs=epochs,
callbacks=callbacks,verbose = 1)
resunet_pp_model.save('final_resunet_pp.hdf5')
resunet_pp_hist_df = pd.DataFrame(resunet_pp_model_history.history)
hist_csv_file = 'history_resunet_pp.csv'
with open(hist_csv_file, mode='w') as f:
resunet_pp_hist_df.to_csv(f)
Epoch 1/50 6/1077 [..............................] - ETA: 12:11 - loss: 0.9709 - one_hot_mean_io_u: 0.1835WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.2407s vs `on_train_batch_end` time: 0.3993s). Check your callbacks. 1077/1077 [==============================] - ETA: 0s - loss: 0.5160 - one_hot_mean_io_u: 0.5531 Epoch 1: saving model to resunet_pp.hdf5 1077/1077 [==============================] - 724s 656ms/step - loss: 0.5160 - one_hot_mean_io_u: 0.5531 - val_loss: 0.4686 - val_one_hot_mean_io_u: 0.6482 - lr: 1.0000e-04 Epoch 2/50 1077/1077 [==============================] - ETA: 0s - loss: 0.3605 - one_hot_mean_io_u: 0.7389 Epoch 2: saving model to resunet_pp.hdf5 1077/1077 [==============================] - 674s 623ms/step - loss: 0.3605 - one_hot_mean_io_u: 0.7389 - val_loss: 0.3569 - val_one_hot_mean_io_u: 0.7421 - lr: 1.0000e-04 Epoch 3/50 1077/1077 [==============================] - ETA: 0s - loss: 0.3075 - one_hot_mean_io_u: 0.7719 Epoch 3: saving model to resunet_pp.hdf5 1077/1077 [==============================] - 658s 611ms/step - loss: 0.3075 - one_hot_mean_io_u: 0.7719 - val_loss: 0.2867 - val_one_hot_mean_io_u: 0.7852 - lr: 1.0000e-04 Epoch 4/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2886 - one_hot_mean_io_u: 0.7936 Epoch 4: saving model to resunet_pp.hdf5 1077/1077 [==============================] - 713s 662ms/step - loss: 0.2886 - one_hot_mean_io_u: 0.7936 - val_loss: 0.2302 - val_one_hot_mean_io_u: 0.8189 - lr: 1.0000e-04 Epoch 5/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2514 - one_hot_mean_io_u: 0.8113 Epoch 5: saving model to resunet_pp.hdf5 1077/1077 [==============================] - 643s 597ms/step - loss: 0.2514 - one_hot_mean_io_u: 0.8113 - val_loss: 0.2757 - val_one_hot_mean_io_u: 0.7587 - lr: 1.0000e-04 Epoch 6/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2433 - one_hot_mean_io_u: 0.8189 Epoch 6: saving model to resunet_pp.hdf5 1077/1077 [==============================] - 639s 594ms/step - loss: 0.2433 - one_hot_mean_io_u: 0.8189 - val_loss: 0.2306 - val_one_hot_mean_io_u: 0.8311 - lr: 1.0000e-04 Epoch 7/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2283 - one_hot_mean_io_u: 0.8290 Epoch 7: saving model to resunet_pp.hdf5 1077/1077 [==============================] - 627s 582ms/step - loss: 0.2283 - one_hot_mean_io_u: 0.8290 - val_loss: 0.2109 - val_one_hot_mean_io_u: 0.8385 - lr: 1.0000e-04 Epoch 8/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2119 - one_hot_mean_io_u: 0.8387 Epoch 8: saving model to resunet_pp.hdf5 1077/1077 [==============================] - 627s 582ms/step - loss: 0.2119 - one_hot_mean_io_u: 0.8387 - val_loss: 0.2065 - val_one_hot_mean_io_u: 0.8411 - lr: 1.0000e-04 Epoch 9/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2166 - one_hot_mean_io_u: 0.8337 Epoch 9: saving model to resunet_pp.hdf5 1077/1077 [==============================] - 629s 584ms/step - loss: 0.2166 - one_hot_mean_io_u: 0.8337 - val_loss: 0.2067 - val_one_hot_mean_io_u: 0.8347 - lr: 1.0000e-04 Epoch 10/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2033 - one_hot_mean_io_u: 0.8448 Epoch 10: saving model to resunet_pp.hdf5 1077/1077 [==============================] - 632s 587ms/step - loss: 0.2033 - one_hot_mean_io_u: 0.8448 - val_loss: 0.1965 - val_one_hot_mean_io_u: 0.8487 - lr: 1.0000e-04 Epoch 11/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2090 - one_hot_mean_io_u: 0.8426 Epoch 11: saving model to resunet_pp.hdf5 1077/1077 [==============================] - 631s 586ms/step - loss: 0.2090 - one_hot_mean_io_u: 0.8426 - val_loss: 0.3780 - val_one_hot_mean_io_u: 0.6695 - lr: 1.0000e-04 Epoch 12/50 1077/1077 [==============================] - ETA: 0s - loss: 0.1861 - one_hot_mean_io_u: 0.8548 Epoch 12: saving model to resunet_pp.hdf5 1077/1077 [==============================] - 630s 585ms/step - loss: 0.1861 - one_hot_mean_io_u: 0.8548 - val_loss: 0.1758 - val_one_hot_mean_io_u: 0.8632 - lr: 1.0000e-04 Epoch 13/50 1077/1077 [==============================] - ETA: 0s - loss: 0.1878 - one_hot_mean_io_u: 0.8578 Epoch 13: saving model to resunet_pp.hdf5 1077/1077 [==============================] - 630s 585ms/step - loss: 0.1878 - one_hot_mean_io_u: 0.8578 - val_loss: 0.1788 - val_one_hot_mean_io_u: 0.8638 - lr: 1.0000e-04 Epoch 14/50 1077/1077 [==============================] - ETA: 0s - loss: 0.1906 - one_hot_mean_io_u: 0.8553 Epoch 14: saving model to resunet_pp.hdf5 1077/1077 [==============================] - 623s 579ms/step - loss: 0.1906 - one_hot_mean_io_u: 0.8553 - val_loss: 0.1779 - val_one_hot_mean_io_u: 0.8630 - lr: 1.0000e-04 Epoch 15/50 1077/1077 [==============================] - ETA: 0s - loss: 0.1767 - one_hot_mean_io_u: 0.8635 Epoch 15: saving model to resunet_pp.hdf5 1077/1077 [==============================] - 619s 575ms/step - loss: 0.1767 - one_hot_mean_io_u: 0.8635 - val_loss: 0.1519 - val_one_hot_mean_io_u: 0.8734 - lr: 1.0000e-04 Epoch 16/50 1077/1077 [==============================] - ETA: 0s - loss: 0.1657 - one_hot_mean_io_u: 0.8679 Epoch 16: saving model to resunet_pp.hdf5 1077/1077 [==============================] - 622s 577ms/step - loss: 0.1657 - one_hot_mean_io_u: 0.8679 - val_loss: 0.1750 - val_one_hot_mean_io_u: 0.8678 - lr: 1.0000e-04 Epoch 17/50 1077/1077 [==============================] - ETA: 0s - loss: 0.1731 - one_hot_mean_io_u: 0.8678 Epoch 17: saving model to resunet_pp.hdf5 1077/1077 [==============================] - 618s 574ms/step - loss: 0.1731 - one_hot_mean_io_u: 0.8678 - val_loss: 0.1756 - val_one_hot_mean_io_u: 0.8681 - lr: 1.0000e-04 Epoch 18/50 1077/1077 [==============================] - ETA: 0s - loss: 0.1770 - one_hot_mean_io_u: 0.8632 Epoch 18: saving model to resunet_pp.hdf5 Epoch 18: ReduceLROnPlateau reducing learning rate to 9.999999747378752e-06. 1077/1077 [==============================] - 619s 575ms/step - loss: 0.1770 - one_hot_mean_io_u: 0.8632 - val_loss: 0.1884 - val_one_hot_mean_io_u: 0.8525 - lr: 1.0000e-04 Epoch 18: early stopping
Обучение vgg16_unet (не трогать!!!)¶
In [21]:
history_class = LossHistory()
callbacks = [
ModelCheckpoint("vgg16unet_model.hdf5", verbose=1, save_best_model=True),
ReduceLROnPlateau(monitor="val_loss", patience=3, factor=0.1, verbose=1, min_lr=1e-6),
EarlyStopping(monitor="val_loss", patience=3, verbose=1,min_delta=0.0001),
TensorBoard(log_dir='logs'),
history_class
]
vgg16unet_model_history = vgg16unet_model.fit(train_ds,
steps_per_epoch=train_steps,validation_data=val_ds,
validation_steps=valid_steps,epochs=epochs,
callbacks=callbacks,verbose = 1)
vgg16unet_model.save('final_vgg16unet_model.hdf5')
vgg16unet_hist_df = pd.DataFrame(vgg16unet_model_history.history)
hist_csv_file = 'history_vgg16unet.csv'
with open(hist_csv_file, mode='w') as f:
vgg16unet_hist_df.to_csv(f)
Epoch 1/50 1077/1077 [==============================] - ETA: 0s - loss: 0.5646 - one_hot_mean_io_u: 0.3920 Epoch 1: saving model to vgg16unet_model.hdf5 1077/1077 [==============================] - 762s 692ms/step - loss: 0.5646 - one_hot_mean_io_u: 0.3920 - val_loss: 0.4126 - val_one_hot_mean_io_u: 0.6454 - lr: 1.0000e-04 Epoch 2/50 1077/1077 [==============================] - ETA: 0s - loss: 0.3728 - one_hot_mean_io_u: 0.7073 Epoch 2: saving model to vgg16unet_model.hdf5 1077/1077 [==============================] - 748s 691ms/step - loss: 0.3728 - one_hot_mean_io_u: 0.7073 - val_loss: 0.4258 - val_one_hot_mean_io_u: 0.6973 - lr: 1.0000e-04 Epoch 3/50 1077/1077 [==============================] - ETA: 0s - loss: 0.3402 - one_hot_mean_io_u: 0.7411 Epoch 3: saving model to vgg16unet_model.hdf5 1077/1077 [==============================] - 744s 691ms/step - loss: 0.3402 - one_hot_mean_io_u: 0.7411 - val_loss: 0.3141 - val_one_hot_mean_io_u: 0.7583 - lr: 1.0000e-04 Epoch 4/50 1077/1077 [==============================] - ETA: 0s - loss: 0.3341 - one_hot_mean_io_u: 0.7493 Epoch 4: saving model to vgg16unet_model.hdf5 1077/1077 [==============================] - 743s 690ms/step - loss: 0.3341 - one_hot_mean_io_u: 0.7493 - val_loss: 0.3538 - val_one_hot_mean_io_u: 0.7231 - lr: 1.0000e-04 Epoch 5/50 1077/1077 [==============================] - ETA: 0s - loss: 0.3174 - one_hot_mean_io_u: 0.7538 Epoch 5: saving model to vgg16unet_model.hdf5 1077/1077 [==============================] - 743s 690ms/step - loss: 0.3174 - one_hot_mean_io_u: 0.7538 - val_loss: 0.2991 - val_one_hot_mean_io_u: 0.7750 - lr: 1.0000e-04 Epoch 6/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2942 - one_hot_mean_io_u: 0.7711 Epoch 6: saving model to vgg16unet_model.hdf5 1077/1077 [==============================] - 742s 689ms/step - loss: 0.2942 - one_hot_mean_io_u: 0.7711 - val_loss: 0.3470 - val_one_hot_mean_io_u: 0.7652 - lr: 1.0000e-04 Epoch 7/50 1077/1077 [==============================] - ETA: 0s - loss: 0.3008 - one_hot_mean_io_u: 0.7639 Epoch 7: saving model to vgg16unet_model.hdf5 1077/1077 [==============================] - 742s 689ms/step - loss: 0.3008 - one_hot_mean_io_u: 0.7639 - val_loss: 0.4518 - val_one_hot_mean_io_u: 0.6911 - lr: 1.0000e-04 Epoch 8/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2893 - one_hot_mean_io_u: 0.7714 Epoch 8: saving model to vgg16unet_model.hdf5 1077/1077 [==============================] - 742s 689ms/step - loss: 0.2893 - one_hot_mean_io_u: 0.7714 - val_loss: 0.2716 - val_one_hot_mean_io_u: 0.7814 - lr: 1.0000e-04 Epoch 9/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2729 - one_hot_mean_io_u: 0.7824 Epoch 9: saving model to vgg16unet_model.hdf5 1077/1077 [==============================] - 743s 690ms/step - loss: 0.2729 - one_hot_mean_io_u: 0.7824 - val_loss: 0.3288 - val_one_hot_mean_io_u: 0.7691 - lr: 1.0000e-04 Epoch 10/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2787 - one_hot_mean_io_u: 0.7835 Epoch 10: saving model to vgg16unet_model.hdf5 1077/1077 [==============================] - 742s 689ms/step - loss: 0.2787 - one_hot_mean_io_u: 0.7835 - val_loss: 0.4320 - val_one_hot_mean_io_u: 0.6925 - lr: 1.0000e-04 Epoch 11/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2693 - one_hot_mean_io_u: 0.7870 Epoch 11: saving model to vgg16unet_model.hdf5 1077/1077 [==============================] - 741s 688ms/step - loss: 0.2693 - one_hot_mean_io_u: 0.7870 - val_loss: 0.2679 - val_one_hot_mean_io_u: 0.7875 - lr: 1.0000e-04 Epoch 12/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2421 - one_hot_mean_io_u: 0.8016 Epoch 12: saving model to vgg16unet_model.hdf5 1077/1077 [==============================] - 748s 694ms/step - loss: 0.2421 - one_hot_mean_io_u: 0.8016 - val_loss: 0.2317 - val_one_hot_mean_io_u: 0.8101 - lr: 1.0000e-04 Epoch 13/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2471 - one_hot_mean_io_u: 0.8023 Epoch 13: saving model to vgg16unet_model.hdf5 1077/1077 [==============================] - 740s 687ms/step - loss: 0.2471 - one_hot_mean_io_u: 0.8023 - val_loss: 0.2258 - val_one_hot_mean_io_u: 0.8208 - lr: 1.0000e-04 Epoch 14/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2441 - one_hot_mean_io_u: 0.8069 Epoch 14: saving model to vgg16unet_model.hdf5 1077/1077 [==============================] - 740s 687ms/step - loss: 0.2441 - one_hot_mean_io_u: 0.8069 - val_loss: 0.2234 - val_one_hot_mean_io_u: 0.8243 - lr: 1.0000e-04 Epoch 15/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2391 - one_hot_mean_io_u: 0.8104 Epoch 15: saving model to vgg16unet_model.hdf5 1077/1077 [==============================] - 740s 687ms/step - loss: 0.2391 - one_hot_mean_io_u: 0.8104 - val_loss: 0.2106 - val_one_hot_mean_io_u: 0.8312 - lr: 1.0000e-04 Epoch 16/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2380 - one_hot_mean_io_u: 0.8090 Epoch 16: saving model to vgg16unet_model.hdf5 1077/1077 [==============================] - 739s 686ms/step - loss: 0.2380 - one_hot_mean_io_u: 0.8090 - val_loss: 0.2289 - val_one_hot_mean_io_u: 0.8281 - lr: 1.0000e-04 Epoch 17/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2327 - one_hot_mean_io_u: 0.8139 Epoch 17: saving model to vgg16unet_model.hdf5 1077/1077 [==============================] - 740s 687ms/step - loss: 0.2327 - one_hot_mean_io_u: 0.8139 - val_loss: 0.2217 - val_one_hot_mean_io_u: 0.8260 - lr: 1.0000e-04 Epoch 18/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2225 - one_hot_mean_io_u: 0.8221 Epoch 18: saving model to vgg16unet_model.hdf5 1077/1077 [==============================] - 740s 687ms/step - loss: 0.2225 - one_hot_mean_io_u: 0.8221 - val_loss: 0.2089 - val_one_hot_mean_io_u: 0.8219 - lr: 1.0000e-04 Epoch 19/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2287 - one_hot_mean_io_u: 0.8165 Epoch 19: saving model to vgg16unet_model.hdf5 1077/1077 [==============================] - 740s 687ms/step - loss: 0.2287 - one_hot_mean_io_u: 0.8165 - val_loss: 0.2274 - val_one_hot_mean_io_u: 0.8175 - lr: 1.0000e-04 Epoch 20/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2190 - one_hot_mean_io_u: 0.8242 Epoch 20: saving model to vgg16unet_model.hdf5 1077/1077 [==============================] - 739s 686ms/step - loss: 0.2190 - one_hot_mean_io_u: 0.8242 - val_loss: 0.2271 - val_one_hot_mean_io_u: 0.8233 - lr: 1.0000e-04 Epoch 21/50 1077/1077 [==============================] - ETA: 0s - loss: 0.2295 - one_hot_mean_io_u: 0.8154 Epoch 21: saving model to vgg16unet_model.hdf5 Epoch 21: ReduceLROnPlateau reducing learning rate to 9.999999747378752e-06. 1077/1077 [==============================] - 740s 687ms/step - loss: 0.2295 - one_hot_mean_io_u: 0.8154 - val_loss: 0.2522 - val_one_hot_mean_io_u: 0.8001 - lr: 1.0000e-04 Epoch 21: early stopping
--------------------------------------------------------------------------- NameError Traceback (most recent call last) Cell In[21], line 14 1 callbacks = [ 2 ModelCheckpoint("vgg16unet_model.hdf5", verbose=1, save_best_model=True), 3 ReduceLROnPlateau(monitor="val_loss", patience=3, factor=0.1, verbose=1, min_lr=1e-6), (...) 6 history_class 7 ] 9 vgg16unet_model_history = vgg16unet_model.fit(train_ds, 10 steps_per_epoch=train_steps,validation_data=val_ds, 11 validation_steps=valid_steps,epochs=epochs, 12 callbacks=callbacks,verbose = 1) ---> 14 model.save('final_vgg16unet_model.hdf5') NameError: name 'model' is not defined
Визуализация результатов обучения¶
In [14]:
vgg16_unet = tf.keras.models.load_model('models/final_vgg16unet_model.hdf5', compile=False)
unet = tf.keras.models.load_model('models/final_UNet.hdf5', compile=False)
resunet_plus_plus = tf.keras.models.load_model('models/final_resunet_pp.hdf5', compile=False)
In [15]:
unet_history = pd.read_csv('histories/history_unet.csv')
resunet_pp_history = pd.read_csv('histories/history_resunet_pp.csv')
vgg16_unet_history = pd.read_csv('histories/history_vgg16unet.csv')
In [17]:
def plot_history(history, metrics=["one_hot_mean_io_u", "val_one_hot_mean_io_u"], losses=["loss", "val_loss"]):
plt.style.use(['tableau-colorblind10'])
plt.figure(figsize=(12, 6))
for metric in metrics:
plt.plot(history[metric], linewidth=3)
plt.suptitle("metrics over epochs", fontsize=20)
plt.ylabel("metric", fontsize=20)
plt.xlabel("epoch", fontsize=20)
plt.legend(metrics, loc="center right", fontsize=15)
plt.show()
plt.figure(figsize=(12, 6))
for loss in losses:
plt.plot(history[loss], linewidth=3)
plt.suptitle("loss over epochs", fontsize=20)
plt.ylabel("loss", fontsize=20)
plt.xlabel("epoch", fontsize=20)
plt.legend(losses, loc="center right", fontsize=15)
plt.show()
Графики обучения Unet
In [18]:
plot_history(unet_history)
Графики обучения vgg16_unet
In [19]:
plot_history(vgg16_unet_history)
Графики обучения ResUNet_plus_plus
In [20]:
plot_history(resunet_pp_history)
In [20]:
import random
l = random.sample(range(712), 30)
for i in l:
img = test_x[i]
image = read_image(img)
image = np.expand_dims(image,axis=0)
prediction_vgg16 = vgg16_unet.predict(image).squeeze()
prediction_vgg16 = np.argmax(prediction_vgg16, axis=-1).astype(np.int32)
prediction_unet = unet.predict(image).squeeze()
prediction_unet = np.argmax(prediction_unet, axis=-1).astype(np.int32)
prediction_resunet_pp = resunet_plus_plus.predict(image).squeeze()
prediction_resunet_pp = np.argmax(prediction_resunet_pp, axis=-1).astype(np.int32)
visualize(image = read_image(test_x[i]), mask = read_mask(test_y[i]),
prediction_vgg16 = prediction_vgg16,
prediction_unet = prediction_unet,
prediction_resunet_pp = prediction_resunet_pp)